$(RM) *.a *.so *.o *.rpm $(BIN)
%: %.c $(HDRS) Makefile
- $(CC) $(CFLAGS) -o $@ $< -lxc
+ $(CC) $(CFLAGS) -o $@ $< -L../xc/lib -lxc
/******************************************************************************
- *
* tools/xentrace/xentrace.c
*
* Tool for collecting trace buffer data from Xen.
*
* Author: Mark Williamson, mark.a.williamson@intel.com
* Date: February 2004
- *
- *****************************************************************************/
+ */
#include <time.h>
#include <stdlib.h>
#include "../xc/lib/xc_private.h"
-#define TRACE_BUFFER /* need to define this for trace.h */
#include <xeno/trace.h>
extern FILE *stdout;
* be dereferenced immediately, since it is a physical address of memory in Xen
* space - they are used in this program to mmap the right area from /dev/mem.
*/
-struct t_buf *get_tbuf_ptrs()
+unsigned long get_tbuf_ptrs(void)
{
int ret;
dom0_op_t op; /* dom0 op we'll build */
xc_interface_close(xc_handle);
- if(ret) {
+ if ( ret != 0 )
+ {
PERROR("Failure to get trace buffer pointer from Xen");
exit(EXIT_FAILURE);
}
* address space by memory mapping /dev/mem. Returns a pointer to the location
* the buffers have been mapped to.
*/
-struct t_buf *map_tbufs(struct t_buf *tbufs_phys)
+struct t_buf *map_tbufs(unsigned long tbufs_phys)
{
int dm_fd; /* file descriptor for /dev/mem */
struct t_buf *tbufs_mapped;
dm_fd = open("/dev/mem", O_RDONLY);
-
- if(dm_fd < 0) {
+ if ( dm_fd < 0 )
+ {
PERROR("Open /dev/mem when mapping trace buffers\n");
exit(EXIT_FAILURE);
}
close(dm_fd);
- if(tbufs_mapped == MAP_FAILED) {
+ if ( tbufs_mapped == MAP_FAILED )
+ {
PERROR("Failed to mmap trace buffers");
exit(EXIT_FAILURE);
}
struct t_buf **user_ptrs;
user_ptrs = (struct t_buf **)calloc(opts.num_cpus, sizeof(struct t_buf *));
-
- if(!user_ptrs) {
+ if ( user_ptrs == NULL )
+ {
PERROR( "Failed to allocate memory for buffer pointers\n");
exit(EXIT_FAILURE);
}
/* initialise pointers to the trace buffers - given the size of a trace
* buffer and the value of bufs_maped, we can easily calculate these */
- for(i = 0; i<opts.num_cpus; i++)
+ for ( i = 0; i<opts.num_cpus; i++ )
user_ptrs[i] = (struct t_buf *)(
(unsigned long)bufs_mapped + TB_SIZE * i);
* mapped in user space. Note that the trace buffer metadata contains physical
* pointers - the array returned allows more convenient access to them.
*/
-struct t_rec **init_rec_ptrs(struct t_buf *tbufs_phys,
+struct t_rec **init_rec_ptrs(unsigned long tbufs_phys,
struct t_buf *tbufs_mapped,
struct t_buf **meta)
{
struct t_rec **data;
data = calloc(opts.num_cpus, sizeof(struct t_rec *));
-
- if(!data) {
- PERROR( "Failed to allocate memory for data pointers\n");
+ if ( data == NULL )
+ {
+ PERROR("Failed to allocate memory for data pointers\n");
exit(EXIT_FAILURE);
}
- for(i = 0; i<opts.num_cpus; i++) {
- data[i] = (struct t_rec *)(
- (unsigned long)meta[i]->data
- - (unsigned long)tbufs_phys
- + (unsigned long)tbufs_mapped
- );
- }
+ for ( i = 0; i<opts.num_cpus; i++ )
+ data[i] = (struct t_rec *)((unsigned long)meta[i]->data -
+ tbufs_phys + (unsigned long)tbufs_mapped);
return data;
}
int i;
int *tails = calloc(opts.num_cpus, sizeof(unsigned int));
- if(!tails) {
+ if ( tails == NULL )
+ {
PERROR("Failed to allocate memory for tail pointers\n");
exit(EXIT_FAILURE);
}
- for(i = 0; i<opts.num_cpus; i++)
+ for ( i = 0; i<opts.num_cpus; i++ )
tails[i] = bufs[i]->head;
return tails;
struct t_rec **data; /* pointers to the trace buffer data areas
* where they are mapped into user space. */
int *tails; /* store tail indexes for the trace buffers */
- struct t_buf *tbufs_phys; /* physical address of the tbufs */
+ unsigned long tbufs_phys; /* physical address of the tbufs */
/* setup access to trace buffers */
tbufs_phys = get_tbuf_ptrs();
tails = init_tail_idxs (meta);
/* now, scan buffers for events */
- while(!interrupted) {
- for(i = 0; i < opts.num_cpus; i++) {
+ while ( !interrupted )
+ {
+ for ( i = 0; i < opts.num_cpus; i++ )
+ {
signed long newdata = meta[i]->head - tails[i];
signed long prewrap = newdata;
/* correct newdata and prewrap in case of a pointer wrap */
- if(newdata < 0) {
+ if ( newdata < 0 )
+ {
newdata += meta[i]->size;
prewrap = meta[i]->size - tails[i];
}
- if(newdata >= opts.new_data_thresh) {
+ if ( newdata >= opts.new_data_thresh )
+ {
/* output pre-wrap data */
for(j = 0; j < prewrap; j++)
print_rec(i, data[i] + tails[i] + j, logfile);
{
settings_t *setup = (settings_t *)state->input;
- switch(key)
+ switch ( key )
{
case 't': /* set new records threshold for logging */
{
char *inval;
setup->new_data_thresh = strtol(arg, &inval, 0);
- if(inval == arg) argp_usage(state);
+ if ( inval == arg )
+ argp_usage(state);
}
-
break;
case 's': /* set sleep time (given in milliseconds) */
{
char *inval;
setup->poll_sleep = millis_to_timespec(strtol(arg, &inval, 0));
- if(inval == arg) argp_usage(state);
+ if ( inval == arg )
+ argp_usage(state);
}
break;
{
char *inval;
setup->num_cpus = strtol(arg, &inval, 0);
- if(inval == arg) argp_usage(state);
+ if (inval == arg )
+ argp_usage(state);
}
break;
case ARGP_KEY_ARG:
- if(state->arg_num == 0)
+ {
+ if ( state->arg_num == 0 )
setup->outfile = arg;
else
argp_usage(state);
- break;
+ }
+ break;
default:
return ARGP_ERR_UNKNOWN;
argp_parse(&parser_def, argc, argv, 0, 0, &opts);
- if(opts.outfile) {
+ if ( opts.outfile )
logfile = fopen(opts.outfile, "w");
- }
/* ensure that if we get a signal, we'll do cleanup, then exit */
sigaction(SIGHUP, &act, 0);
watchdog_on = 1;
-#ifdef TRACE_BUFFER
- init_trace_bufs(); /* initialise trace buffers */
-#endif
+ init_trace_bufs();
}
/******************************************************************************
- *
* common/trace.c
*
* Xen Trace Buffer
*
* See also include/xeno/trace.h and the dom0 op in
* include/hypervisor-ifs/dom0_ops.h
- *
- *****************************************************************************/
+ */
#include <xeno/config.h>
-#ifdef TRACE_BUFFER /* don't compile this stuff in unless explicitly enabled */
+#ifdef TRACE_BUFFER
#include <asm/timex.h>
#include <asm/types.h>
#include <xeno/trace.h>
#include <asm/atomic.h>
-
-
/* Pointers to the meta-data objects for all system trace buffers */
struct t_buf *t_bufs[NR_CPUS];
/* a flag recording whether initialisation has been done */
-atomic_t tb_init_done = ATOMIC_INIT(0);
-
+int tb_init_done = 0;
/**
* init_trace_bufs - performs initialisation of the per-cpu trace buffers.
* trace buffers. The trace buffers are then available for debugging use, via
* the %TRACE_xD macros exported in <xeno/trace.h>.
*/
-void init_trace_bufs()
+void init_trace_bufs(void)
{
- int i;
- void *d; /* trace buffer area pointer */
+ int i;
+ char *rawbuf;
+ struct t_buf *buf;
- d = kmalloc(smp_num_cpus * TB_SIZE, GFP_KERNEL);
-
- if( d == NULL ) {
+ if ( (rawbuf = kmalloc(smp_num_cpus * TB_SIZE, GFP_KERNEL)) == NULL )
+ {
printk("Xen trace buffers: memory allocation failed\n");
return;
}
- for(i = 0; i < smp_num_cpus; i++) {
- struct t_buf *buf = t_bufs[i]
- = (struct t_buf *)( (unsigned int)d + TB_SIZE * i );
+ for ( i = 0; i < smp_num_cpus; i++ )
+ {
+ buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*TB_SIZE];
- /* for use in Xen */
- buf->vdata = (struct t_rec *)
- ( (unsigned int)buf + sizeof(struct t_buf) );
+ /* For use in Xen. */
+ buf->vdata = (struct t_rec *)(buf+1);
buf->head_ptr = buf->vdata;
- spin_lock_init(&buf->lock);
+ spin_lock_init(&buf->lock);
- /* for use in user space */
+ /* For use in user space. */
buf->data = (struct t_rec *)__pa(buf->vdata);
- buf->head = 0;
+ buf->head = 0;
- /* for use in both */
- buf->size = (TB_SIZE - sizeof(struct t_buf)) / sizeof(struct t_rec);
+ /* For use in both. */
+ buf->size = (TB_SIZE - sizeof(struct t_buf)) / sizeof(struct t_rec);
}
printk("Xen trace buffers: initialised\n");
wmb(); /* above must be visible before tb_init_done flag set */
- atomic_set(&tb_init_done, 1);
+ tb_init_done = 1;
}
-
-
/**
* get_tb_ptr - return physical address of the trace buffers.
*
* Called by the %DOM0_GETTBUFS dom0 op to fetch the physical address of the
* trace buffers.
*/
-struct t_buf *get_tb_ptr()
+unsigned long get_tb_ptr(void)
{
- /* a physical address (user space maps this using /dev/mem) */
- return (struct t_buf *)__pa(t_bufs[0]);
+ /* Return the physical address. */
+ return __pa(t_bufs[0]);
}
-#endif /* #ifdef TRACE_BUFFER */
+#endif /* TRACE_BUFFER */
typedef struct dom0_gettbufs_st
{
/* OUT variable - location of the trace buffers */
- struct t_buf *phys_addr;
+ unsigned long phys_addr;
} dom0_gettbufs_t;
typedef struct dom0_op_st
} u;
} dom0_op_t;
-
-
-
-
-#endif
+#endif /* __DOM0_OPS_H__ */
/******************************************************************************
- *
* include/xeno/trace.h
*
* Xen Trace Buffer
* trace buffer contents can then be performed using a userland tool.
*
* See also common/trace.c and the dom0 op in include/hypervisor-ifs/dom0_ops.h
- *
- *****************************************************************************/
-
-#ifdef TRACE_BUFFER
+ */
#ifndef __XENO_TRACE_H__
#define __XENO_TRACE_H__
-#ifdef __KERNEL__
-
-#include <xeno/spinlock.h>
-#include <asm/page.h>
-#include <xeno/types.h>
-#include <xeno/sched.h>
-#include <asm/atomic.h>
-#include <asm/current.h>
-#include <asm/msr.h>
-
-#endif /* #ifdef __KERNEL__ */
-
-/******************************************************************************
- * Data structure declarations
- *****************************************************************************/
+/*
+ * How much space is allowed for a single trace buffer, including data and
+ * metadata (and maybe some waste).
+ */
+#define TB_SIZE PAGE_SIZE
/* This structure represents a single trace buffer record. */
struct t_rec {
u32 d1, d2, d3, d4, d5; /* event data items */
};
-/* This structure contains the metadata for a single trace buffer. The head
+/*
+ * This structure contains the metadata for a single trace buffer. The head
* field, indexes into an array of struct t_rec's.
*/
struct t_buf {
struct t_rec *data; /* pointer to data area. physical address
- * for convenience in user space code */
+ * for convenience in user space code */
unsigned int size; /* size of the data area, in t_recs */
unsigned int head; /* array index of the most recent record */
#ifdef __KERNEL__
struct t_rec *head_ptr; /* pointer to the head record */
- struct t_rec *vdata; /* virtual address pointer to data,
- * for use in Xen */
+ struct t_rec *vdata; /* virtual address pointer to data */
spinlock_t lock; /* ensure mutually exlusive access (for inserts) */
-#endif /* #ifdef __KERNEL__ */
+#endif
/* never add anything here - the kernel stuff must be the last elements */
};
-/******************************************************************************
- * Functions
- *****************************************************************************/
+#ifdef TRACE_BUFFER
-#ifdef __KERNEL__
+#include <xeno/spinlock.h>
+#include <asm/page.h>
+#include <xeno/types.h>
+#include <xeno/sched.h>
+#include <asm/atomic.h>
+#include <asm/current.h>
+#include <asm/msr.h>
/* Used to initialise trace buffer functionality */
-void init_trace_bufs();
+void init_trace_bufs(void);
/* used to retrieve the physical address of the trace buffers */
-struct t_buf *get_tb_ptr();
+struct t_buf *get_tb_ptr(void);
/**
* trace - Enters a trace tuple into the trace buffer for the current CPU.
static inline int trace(u32 event, u32 d1, u32 d2, u32 d3, u32 d4, u32 d5)
{
extern struct t_buf *t_bufs[]; /* global array of pointers to bufs */
- extern atomic_t tb_init_done; /* set when buffers are initialised */
+ extern int tb_init_done; /* set when buffers are initialised */
unsigned long flags; /* for saving interrupt flags */
struct t_buf *buf; /* the buffer we're working on */
struct t_rec *rec; /* next record to fill out */
- if(!atomic_read(&tb_init_done)) return -1;
+ if ( !tb_init_done )
+ return -1;
buf = t_bufs[smp_processor_id()];
rec = buf->head_ptr;
spin_lock_irqsave(&buf->lock, flags);
- /* interrupts _disabled locally_ during the following code */
rdtscll(rec->cycles);
rec->event = event;
wmb(); /* above must be visible before reader sees index updated */
- if( likely( buf->head_ptr < ( buf->vdata + buf->size - 1) ) ) {
+ if ( likely(buf->head_ptr < (buf->vdata + buf->size - 1)) )
+ {
buf->head_ptr++;
buf->head++;
- } else {
- buf->head = 0;
+ }
+ else
+ {
+ buf->head = 0;
buf->head_ptr = buf->vdata;
}
spin_unlock_irqrestore(&buf->lock, flags);
- /* Interrupts now _re-enabled locally_ */
return 0;
}
-
-#endif /* #ifdef __KERNEL__ */
-
-
-/******************************************************************************
- * Macros
- *****************************************************************************/
-
-/* How much space is allowed for a single trace buffer, including data and
- * metadata (and maybe some waste).
- */
-#define TB_SIZE PAGE_SIZE
-
-#ifdef __KERNEL__
-
-/* avoids troubling the caller with casting their arguments to a trace macro */
+/* Avoids troubling the caller with casting their arguments to a trace macro */
#define trace_do_casts(e,d1,d2,d3,d4,d5) \
trace(e, \
- (unsigned long)d1, \
- (unsigned long)d2, \
- (unsigned long)d3, \
- (unsigned long)d4, \
- (unsigned long)d5)
+ (unsigned long)d1, \
+ (unsigned long)d2, \
+ (unsigned long)d3, \
+ (unsigned long)d4, \
+ (unsigned long)d5)
-/* convenience macros for calling the trace function */
+/* Convenience macros for calling the trace function. */
#define TRACE_0D(event) trace_do_casts(event,0, 0, 0, 0, 0 )
#define TRACE_1D(event,d) trace_do_casts(event,d, 0, 0, 0, 0 )
#define TRACE_2D(event,d1,d2) trace_do_casts(event,d1,d2,0, 0, 0 )
#define TRACE_4D(event,d1,d2,d3,d4) trace_do_casts(event,d1,d2,d3,d4,0 )
#define TRACE_5D(event,d1,d2,d3,d4,d5) trace_do_casts(event,d1,d2,d3,d4,d5)
-#endif /* #ifdef __KERNEL__ */
+#else
-#endif /* #ifndef __XENO_TRACE_H__ */
+#define init_trace_bufs() ((void)0)
-#else /* #ifdef TRACE_BUFFER */
-
-/* define out macros so that they can be left in code when tracing is disabled */
#define TRACE_0D(event) ((void)0)
#define TRACE_1D(event,d) ((void)0)
#define TRACE_2D(event,d1,d2) ((void)0)
#define TRACE_4D(event,d1,d2,d3,d4) ((void)0)
#define TRACE_5D(event,d1,d2,d3,d4,d5) ((void)0)
-#endif /* #ifdef TRACE_BUFFER */
+#endif /* TRACE_BUFFER */
+
+#endif /* __XENO_TRACE_H__ */